| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 |
| // REQUIRES: riscv-registered-target |
| // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ |
| // RUN: -target-feature +zvfh -disable-O0-optnone \ |
| // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ |
| // RUN: FileCheck --check-prefix=CHECK-RV64 %s |
| |
| #include <riscv_vector.h> |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vv_u8mf8_b64 |
| // CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8mf8_b64(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vx_u8mf8_b64 |
| // CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8mf8_b64(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vv_u8mf4_b32 |
| // CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8mf4_b32(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vx_u8mf4_b32 |
| // CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8mf4_b32(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vv_u8mf2_b16 |
| // CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8mf2_b16(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vx_u8mf2_b16 |
| // CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8mf2_b16(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vv_u8m1_b8 |
| // CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8m1_b8(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vx_u8m1_b8 |
| // CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8m1_b8(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vv_u8m2_b4 |
| // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8m2_b4(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vx_u8m2_b4 |
| // CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8m2_b4(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgeu_vv_u8m4_b2 |
| // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] |
| // |
| vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8m4_b2(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgeu_vx_u8m4_b2 |
| // CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] |
| // |
| vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8m4_b2(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i1> @test_vmsgeu_vv_u8m8_b1 |
| // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] |
| // |
| vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8m8_b1(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i1> @test_vmsgeu_vx_u8m8_b1 |
| // CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] |
| // |
| vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8m8_b1(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vv_u16mf4_b64 |
| // CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16mf4_b64(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vx_u16mf4_b64 |
| // CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16mf4_b64(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vv_u16mf2_b32 |
| // CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16mf2_b32(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vx_u16mf2_b32 |
| // CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16mf2_b32(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vv_u16m1_b16 |
| // CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16m1_b16(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vx_u16m1_b16 |
| // CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16m1_b16(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vv_u16m2_b8 |
| // CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16m2_b8(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vx_u16m2_b8 |
| // CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16m2_b8(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vv_u16m4_b4 |
| // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16m4_b4(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vx_u16m4_b4 |
| // CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16m4_b4(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgeu_vv_u16m8_b2 |
| // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] |
| // |
| vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16m8_b2(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgeu_vx_u16m8_b2 |
| // CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] |
| // |
| vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16m8_b2(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vv_u32mf2_b64 |
| // CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32mf2_b64(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vx_u32mf2_b64 |
| // CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32mf2_b64(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vv_u32m1_b32 |
| // CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32m1_b32(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vx_u32m1_b32 |
| // CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32m1_b32(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vv_u32m2_b16 |
| // CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32m2_b16(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vx_u32m2_b16 |
| // CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32m2_b16(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vv_u32m4_b8 |
| // CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32m4_b8(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vx_u32m4_b8 |
| // CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32m4_b8(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vv_u32m8_b4 |
| // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32m8_b4(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vx_u32m8_b4 |
| // CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32m8_b4(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vv_u64m1_b64 |
| // CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u64m1_b64(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vx_u64m1_b64 |
| // CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u64m1_b64(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vv_u64m2_b32 |
| // CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u64m2_b32(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vx_u64m2_b32 |
| // CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u64m2_b32(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vv_u64m4_b16 |
| // CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u64m4_b16(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vx_u64m4_b16 |
| // CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u64m4_b16(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vv_u64m8_b8 |
| // CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u64m8_b8(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vx_u64m8_b8 |
| // CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u64m8_b8(op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vv_u8mf8_b64_m |
| // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8mf8_b64_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vx_u8mf8_b64_m |
| // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8mf8_b64_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vv_u8mf4_b32_m |
| // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8mf4_b32_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vx_u8mf4_b32_m |
| // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8mf4_b32_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vv_u8mf2_b16_m |
| // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8mf2_b16_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vx_u8mf2_b16_m |
| // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8mf2_b16_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vv_u8m1_b8_m |
| // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8m1_b8_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vx_u8m1_b8_m |
| // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8m1_b8_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vv_u8m2_b4_m |
| // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8m2_b4_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vx_u8m2_b4_m |
| // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8m2_b4_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgeu_vv_u8m4_b2_m |
| // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] |
| // |
| vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8m4_b2_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgeu_vx_u8m4_b2_m |
| // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] |
| // |
| vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8m4_b2_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i1> @test_vmsgeu_vv_u8m8_b1_m |
| // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] |
| // |
| vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u8m8_b1_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i1> @test_vmsgeu_vx_u8m8_b1_m |
| // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] |
| // |
| vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u8m8_b1_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vv_u16mf4_b64_m |
| // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16mf4_b64_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vx_u16mf4_b64_m |
| // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16mf4_b64_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vv_u16mf2_b32_m |
| // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16mf2_b32_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vx_u16mf2_b32_m |
| // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16mf2_b32_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vv_u16m1_b16_m |
| // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16m1_b16_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vx_u16m1_b16_m |
| // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16m1_b16_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vv_u16m2_b8_m |
| // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16m2_b8_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vx_u16m2_b8_m |
| // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16m2_b8_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vv_u16m4_b4_m |
| // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16m4_b4_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vx_u16m4_b4_m |
| // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16m4_b4_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgeu_vv_u16m8_b2_m |
| // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] |
| // |
| vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u16m8_b2_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmsgeu_vx_u16m8_b2_m |
| // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] |
| // |
| vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u16m8_b2_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vv_u32mf2_b64_m |
| // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32mf2_b64_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vx_u32mf2_b64_m |
| // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32mf2_b64_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vv_u32m1_b32_m |
| // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32m1_b32_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vx_u32m1_b32_m |
| // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32m1_b32_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vv_u32m2_b16_m |
| // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32m2_b16_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vx_u32m2_b16_m |
| // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32m2_b16_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vv_u32m4_b8_m |
| // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32m4_b8_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vx_u32m4_b8_m |
| // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32m4_b8_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vv_u32m8_b4_m |
| // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u32m8_b4_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmsgeu_vx_u32m8_b4_m |
| // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] |
| // |
| vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u32m8_b4_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vv_u64m1_b64_m |
| // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u64m1_b64_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmsgeu_vx_u64m1_b64_m |
| // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] |
| // |
| vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u64m1_b64_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vv_u64m2_b32_m |
| // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u64m2_b32_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmsgeu_vx_u64m2_b32_m |
| // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] |
| // |
| vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u64m2_b32_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vv_u64m4_b16_m |
| // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u64m4_b16_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmsgeu_vx_u64m4_b16_m |
| // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] |
| // |
| vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u64m4_b16_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vv_u64m8_b8_m |
| // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { |
| return __riscv_vmsgeu_vv_u64m8_b8_m(mask, op1, op2, vl); |
| } |
| |
| // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmsgeu_vx_u64m8_b8_m |
| // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] |
| // |
| vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { |
| return __riscv_vmsgeu_vx_u64m8_b8_m(mask, op1, op2, vl); |
| } |
| |