| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 |
| // REQUIRES: riscv-registered-target |
| // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ |
| // RUN: -target-feature +zvfh -disable-O0-optnone \ |
| // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ |
| // RUN: FileCheck --check-prefix=CHECK-RV64 %s |
| |
| #include <riscv_vector.h> |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vmerge_vvm_i8mf8_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] |
| // |
| vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vmerge_vxm_i8mf8_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] |
| // |
| vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vmerge_vvm_i8mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] |
| // |
| vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vmerge_vxm_i8mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] |
| // |
| vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vmerge_vvm_i8mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] |
| // |
| vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vmerge_vxm_i8mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] |
| // |
| vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vmerge_vvm_i8m1_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] |
| // |
| vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vmerge_vxm_i8m1_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] |
| // |
| vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vmerge_vvm_i8m2_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| // |
| vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vmerge_vxm_i8m2_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| // |
| vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vmerge_vvm_i8m4_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] |
| // |
| vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vmerge_vxm_i8m4_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] |
| // |
| vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vmerge_vvm_i8m8_tu |
| // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] |
| // |
| vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vmerge_vxm_i8m8_tu |
| // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] |
| // |
| vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vmerge_vvm_i16mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vmerge_vxm_i16mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vmerge_vvm_i16mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vmerge_vxm_i16mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vmerge_vvm_i16m1_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vmerge_vxm_i16m1_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vmerge_vvm_i16m2_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vmerge_vxm_i16m2_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vmerge_vvm_i16m4_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vmerge_vxm_i16m4_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vmerge_vvm_i16m8_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] |
| // |
| vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vmerge_vxm_i16m8_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] |
| // |
| vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vmerge_vvm_i32mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vmerge_vxm_i32mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vmerge_vvm_i32m1_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vmerge_vxm_i32m1_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vmerge_vvm_i32m2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vmerge_vxm_i32m2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vmerge_vvm_i32m4_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vmerge_vxm_i32m4_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vmerge_vvm_i32m8_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] |
| // |
| vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vmerge_vxm_i32m8_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] |
| // |
| vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vmerge_vvm_i64m1_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] |
| // |
| vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vmerge_vxm_i64m1_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] |
| // |
| vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vmerge_vvm_i64m2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] |
| // |
| vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vmerge_vxm_i64m2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] |
| // |
| vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vmerge_vvm_i64m4_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] |
| // |
| vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vmerge_vxm_i64m4_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] |
| // |
| vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vmerge_vvm_i64m8_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] |
| // |
| vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vmerge_vxm_i64m8_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] |
| // |
| vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vmerge_vvm_u8mf8_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] |
| // |
| vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vmerge_vxm_u8mf8_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]] |
| // |
| vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vmerge_vvm_u8mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] |
| // |
| vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vmerge_vxm_u8mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]] |
| // |
| vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vmerge_vvm_u8mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] |
| // |
| vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vmerge_vxm_u8mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]] |
| // |
| vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vmerge_vvm_u8m1_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] |
| // |
| vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vmerge_vxm_u8m1_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]] |
| // |
| vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vmerge_vvm_u8m2_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| // |
| vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vmerge_vxm_u8m2_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]] |
| // |
| vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vmerge_vvm_u8m4_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] |
| // |
| vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vmerge_vxm_u8m4_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]] |
| // |
| vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vmerge_vvm_u8m8_tu |
| // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] |
| // |
| vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vmerge_vxm_u8m8_tu |
| // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]] |
| // |
| vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vmerge_vvm_u16mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vmerge_vxm_u16mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vmerge_vvm_u16mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vmerge_vxm_u16mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vmerge_vvm_u16m1_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vmerge_vxm_u16m1_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vmerge_vvm_u16m2_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vmerge_vxm_u16m2_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vmerge_vvm_u16m4_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vmerge_vxm_u16m4_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vmerge_vvm_u16m8_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] |
| // |
| vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vmerge_vxm_u16m8_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]] |
| // |
| vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vmerge_vvm_u32mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vmerge_vxm_u32mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vmerge_vvm_u32m1_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vmerge_vxm_u32m1_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vmerge_vvm_u32m2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vmerge_vxm_u32m2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vmerge_vvm_u32m4_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vmerge_vxm_u32m4_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vmerge_vvm_u32m8_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] |
| // |
| vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vmerge_vxm_u32m8_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]] |
| // |
| vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vmerge_vvm_u64m1_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] |
| // |
| vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vmerge_vxm_u64m1_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]] |
| // |
| vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vmerge_vvm_u64m2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] |
| // |
| vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vmerge_vxm_u64m2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]] |
| // |
| vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vmerge_vvm_u64m4_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] |
| // |
| vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vmerge_vxm_u64m4_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]] |
| // |
| vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vmerge_vvm_u64m8_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] |
| // |
| vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vmerge_vxm_u64m8_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]] |
| // |
| vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vmerge_vvm_f16mf4_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]] |
| // |
| vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vmerge_vvm_f16mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]] |
| // |
| vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vmerge_vvm_f16m1_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]] |
| // |
| vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vmerge_vvm_f16m2_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]] |
| // |
| vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vmerge_vvm_f16m4_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]] |
| // |
| vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vmerge_vvm_f16m8_tu |
| // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]] |
| // |
| vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vmerge_vvm_f32mf2_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vmerge_vvm_f32m1_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vmerge_vvm_f32m2_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vmerge_vvm_f32m4_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vmerge_vvm_f32m8_tu |
| // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]] |
| // |
| vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vmerge_vvm_f64m1_tu |
| // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]] |
| // |
| vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vmerge_vvm_f64m2_tu |
| // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]] |
| // |
| vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vmerge_vvm_f64m4_tu |
| // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]] |
| // |
| vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vmerge_vvm_f64m8_tu |
| // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]] |
| // |
| vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { |
| return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); |
| } |
| |