blob: b6f6d0e28c6200058da17977460fd3d2122af210 [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh \
// RUN: -O0 -emit-llvm %s -o - | FileCheck %s
#include <riscv_vector.h>
// This test case tests the typedef generated under riscv_vector.h
// CHECK-LABEL: define dso_local void @_Z3foov
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[B64:%.*]] = alloca <vscale x 1 x i1>, align 1
// CHECK-NEXT: [[B32:%.*]] = alloca <vscale x 2 x i1>, align 1
// CHECK-NEXT: [[B16:%.*]] = alloca <vscale x 4 x i1>, align 1
// CHECK-NEXT: [[B8:%.*]] = alloca <vscale x 8 x i1>, align 1
// CHECK-NEXT: [[I8MF8:%.*]] = alloca <vscale x 1 x i8>, align 1
// CHECK-NEXT: [[I8MF4:%.*]] = alloca <vscale x 2 x i8>, align 1
// CHECK-NEXT: [[I8MF2:%.*]] = alloca <vscale x 4 x i8>, align 1
// CHECK-NEXT: [[I8M1:%.*]] = alloca <vscale x 8 x i8>, align 1
// CHECK-NEXT: [[I8M2:%.*]] = alloca <vscale x 16 x i8>, align 1
// CHECK-NEXT: [[I8M4:%.*]] = alloca <vscale x 32 x i8>, align 1
// CHECK-NEXT: [[I8M8:%.*]] = alloca <vscale x 64 x i8>, align 1
// CHECK-NEXT: [[U8MF8:%.*]] = alloca <vscale x 1 x i8>, align 1
// CHECK-NEXT: [[U8MF4:%.*]] = alloca <vscale x 2 x i8>, align 1
// CHECK-NEXT: [[U8MF2:%.*]] = alloca <vscale x 4 x i8>, align 1
// CHECK-NEXT: [[U8M1:%.*]] = alloca <vscale x 8 x i8>, align 1
// CHECK-NEXT: [[U8M2:%.*]] = alloca <vscale x 16 x i8>, align 1
// CHECK-NEXT: [[U8M4:%.*]] = alloca <vscale x 32 x i8>, align 1
// CHECK-NEXT: [[U8M8:%.*]] = alloca <vscale x 64 x i8>, align 1
// CHECK-NEXT: [[I16MF4:%.*]] = alloca <vscale x 1 x i16>, align 2
// CHECK-NEXT: [[I16MF2:%.*]] = alloca <vscale x 2 x i16>, align 2
// CHECK-NEXT: [[I16M1:%.*]] = alloca <vscale x 4 x i16>, align 2
// CHECK-NEXT: [[I16M2:%.*]] = alloca <vscale x 8 x i16>, align 2
// CHECK-NEXT: [[I16M4:%.*]] = alloca <vscale x 16 x i16>, align 2
// CHECK-NEXT: [[I16M8:%.*]] = alloca <vscale x 32 x i16>, align 2
// CHECK-NEXT: [[U16MF4:%.*]] = alloca <vscale x 1 x i16>, align 2
// CHECK-NEXT: [[U16MF2:%.*]] = alloca <vscale x 2 x i16>, align 2
// CHECK-NEXT: [[U16M1:%.*]] = alloca <vscale x 4 x i16>, align 2
// CHECK-NEXT: [[U16M2:%.*]] = alloca <vscale x 8 x i16>, align 2
// CHECK-NEXT: [[U16M4:%.*]] = alloca <vscale x 16 x i16>, align 2
// CHECK-NEXT: [[U16M8:%.*]] = alloca <vscale x 32 x i16>, align 2
// CHECK-NEXT: [[I32MF2:%.*]] = alloca <vscale x 1 x i32>, align 4
// CHECK-NEXT: [[I32M1:%.*]] = alloca <vscale x 2 x i32>, align 4
// CHECK-NEXT: [[I32M2:%.*]] = alloca <vscale x 4 x i32>, align 4
// CHECK-NEXT: [[I32M4:%.*]] = alloca <vscale x 8 x i32>, align 4
// CHECK-NEXT: [[I32M8:%.*]] = alloca <vscale x 16 x i32>, align 4
// CHECK-NEXT: [[U32MF2:%.*]] = alloca <vscale x 1 x i32>, align 4
// CHECK-NEXT: [[U32M1:%.*]] = alloca <vscale x 2 x i32>, align 4
// CHECK-NEXT: [[U32M2:%.*]] = alloca <vscale x 4 x i32>, align 4
// CHECK-NEXT: [[U32M4:%.*]] = alloca <vscale x 8 x i32>, align 4
// CHECK-NEXT: [[U32M8:%.*]] = alloca <vscale x 16 x i32>, align 4
// CHECK-NEXT: [[I64M1:%.*]] = alloca <vscale x 1 x i64>, align 8
// CHECK-NEXT: [[I64M2:%.*]] = alloca <vscale x 2 x i64>, align 8
// CHECK-NEXT: [[I64M4:%.*]] = alloca <vscale x 4 x i64>, align 8
// CHECK-NEXT: [[I64M8:%.*]] = alloca <vscale x 8 x i64>, align 8
// CHECK-NEXT: [[U64M1:%.*]] = alloca <vscale x 1 x i64>, align 8
// CHECK-NEXT: [[U64M2:%.*]] = alloca <vscale x 2 x i64>, align 8
// CHECK-NEXT: [[U64M4:%.*]] = alloca <vscale x 4 x i64>, align 8
// CHECK-NEXT: [[U64M8:%.*]] = alloca <vscale x 8 x i64>, align 8
// CHECK-NEXT: [[F16MF4:%.*]] = alloca <vscale x 1 x half>, align 2
// CHECK-NEXT: [[F16MF2:%.*]] = alloca <vscale x 2 x half>, align 2
// CHECK-NEXT: [[F16M1:%.*]] = alloca <vscale x 4 x half>, align 2
// CHECK-NEXT: [[F16M2:%.*]] = alloca <vscale x 8 x half>, align 2
// CHECK-NEXT: [[F16M4:%.*]] = alloca <vscale x 16 x half>, align 2
// CHECK-NEXT: [[F16M8:%.*]] = alloca <vscale x 32 x half>, align 2
// CHECK-NEXT: [[F32MF2:%.*]] = alloca <vscale x 1 x float>, align 4
// CHECK-NEXT: [[F32M1:%.*]] = alloca <vscale x 2 x float>, align 4
// CHECK-NEXT: [[F32M2:%.*]] = alloca <vscale x 4 x float>, align 4
// CHECK-NEXT: [[F32M4:%.*]] = alloca <vscale x 8 x float>, align 4
// CHECK-NEXT: [[F32M8:%.*]] = alloca <vscale x 16 x float>, align 4
// CHECK-NEXT: [[F64M1:%.*]] = alloca <vscale x 1 x double>, align 8
// CHECK-NEXT: [[F64M2:%.*]] = alloca <vscale x 2 x double>, align 8
// CHECK-NEXT: [[F64M4:%.*]] = alloca <vscale x 4 x double>, align 8
// CHECK-NEXT: [[F64M8:%.*]] = alloca <vscale x 8 x double>, align 8
// CHECK-NEXT: [[I8MF8X2:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[I8MF8X3:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[I8MF8X4:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[I8MF8X5:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[I8MF8X6:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[I8MF8X7:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[I8MF8X8:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[I8MF4X2:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[I8MF4X3:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[I8MF4X4:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[I8MF4X5:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[I8MF4X6:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[I8MF4X7:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[I8MF4X8:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[I8MF2X2:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[I8MF2X3:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[I8MF2X4:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[I8MF2X5:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[I8MF2X6:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[I8MF2X7:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[I8MF2X8:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[I8M1X2:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[I8M1X3:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[I8M1X4:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[I8M1X5:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[I8M1X6:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[I8M1X7:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[I8M1X8:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[I8M2X2:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 1
// CHECK-NEXT: [[I8M2X3:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 1
// CHECK-NEXT: [[I8M2X4:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 1
// CHECK-NEXT: [[I8M4X2:%.*]] = alloca { <vscale x 32 x i8>, <vscale x 32 x i8> }, align 1
// CHECK-NEXT: [[U8MF8X2:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[U8MF8X3:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[U8MF8X4:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[U8MF8X5:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[U8MF8X6:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[U8MF8X7:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[U8MF8X8:%.*]] = alloca { <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8> }, align 1
// CHECK-NEXT: [[U8MF4X2:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[U8MF4X3:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[U8MF4X4:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[U8MF4X5:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[U8MF4X6:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[U8MF4X7:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[U8MF4X8:%.*]] = alloca { <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8> }, align 1
// CHECK-NEXT: [[U8MF2X2:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[U8MF2X3:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[U8MF2X4:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[U8MF2X5:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[U8MF2X6:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[U8MF2X7:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[U8MF2X8:%.*]] = alloca { <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8> }, align 1
// CHECK-NEXT: [[U8M1X2:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[U8M1X3:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[U8M1X4:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[U8M1X5:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[U8M1X6:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[U8M1X7:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[U8M1X8:%.*]] = alloca { <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8> }, align 1
// CHECK-NEXT: [[U8M2X2:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 1
// CHECK-NEXT: [[U8M2X3:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 1
// CHECK-NEXT: [[U8M2X4:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 1
// CHECK-NEXT: [[U8M4X2:%.*]] = alloca { <vscale x 32 x i8>, <vscale x 32 x i8> }, align 1
// CHECK-NEXT: [[I16MF4X2:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[I16MF4X3:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[I16MF4X4:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[I16MF4X5:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[I16MF4X6:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[I16MF4X7:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[I16MF4X8:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[I16MF2X2:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[I16MF2X3:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[I16MF2X4:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[I16MF2X5:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[I16MF2X6:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[I16MF2X7:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[I16MF2X8:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[I16M1X2:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[I16M1X3:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[I16M1X4:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[I16M1X5:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[I16M1X6:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[I16M1X7:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[I16M1X8:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[I16M2X2:%.*]] = alloca { <vscale x 8 x i16>, <vscale x 8 x i16> }, align 2
// CHECK-NEXT: [[I16M2X3:%.*]] = alloca { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> }, align 2
// CHECK-NEXT: [[I16M2X4:%.*]] = alloca { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> }, align 2
// CHECK-NEXT: [[I16M4X2:%.*]] = alloca { <vscale x 16 x i16>, <vscale x 16 x i16> }, align 2
// CHECK-NEXT: [[U16MF4X2:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[U16MF4X3:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[U16MF4X4:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[U16MF4X5:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[U16MF4X6:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[U16MF4X7:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[U16MF4X8:%.*]] = alloca { <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16> }, align 2
// CHECK-NEXT: [[U16MF2X2:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[U16MF2X3:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[U16MF2X4:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[U16MF2X5:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[U16MF2X6:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[U16MF2X7:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[U16MF2X8:%.*]] = alloca { <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16> }, align 2
// CHECK-NEXT: [[U16M1X2:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[U16M1X3:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[U16M1X4:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[U16M1X5:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[U16M1X6:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[U16M1X7:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[U16M1X8:%.*]] = alloca { <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16> }, align 2
// CHECK-NEXT: [[U16M2X2:%.*]] = alloca { <vscale x 8 x i16>, <vscale x 8 x i16> }, align 2
// CHECK-NEXT: [[U16M2X3:%.*]] = alloca { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> }, align 2
// CHECK-NEXT: [[U16M2X4:%.*]] = alloca { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> }, align 2
// CHECK-NEXT: [[U16M4X2:%.*]] = alloca { <vscale x 16 x i16>, <vscale x 16 x i16> }, align 2
// CHECK-NEXT: [[I32MF2X2:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[I32MF2X3:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[I32MF2X4:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[I32MF2X5:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[I32MF2X6:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[I32MF2X7:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[I32MF2X8:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[I32M1X2:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[I32M1X3:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[I32M1X4:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[I32M1X5:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[I32M1X6:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[I32M1X7:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[I32M1X8:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[I32M2X2:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32> }, align 4
// CHECK-NEXT: [[I32M2X3:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 4
// CHECK-NEXT: [[I32M2X4:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 4
// CHECK-NEXT: [[I32M4X2:%.*]] = alloca { <vscale x 8 x i32>, <vscale x 8 x i32> }, align 4
// CHECK-NEXT: [[U32MF2X2:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[U32MF2X3:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[U32MF2X4:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[U32MF2X5:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[U32MF2X6:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[U32MF2X7:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[U32MF2X8:%.*]] = alloca { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> }, align 4
// CHECK-NEXT: [[U32M1X2:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[U32M1X3:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[U32M1X4:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[U32M1X5:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[U32M1X6:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[U32M1X7:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[U32M1X8:%.*]] = alloca { <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32> }, align 4
// CHECK-NEXT: [[U32M2X2:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32> }, align 4
// CHECK-NEXT: [[U32M2X3:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 4
// CHECK-NEXT: [[U32M2X4:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 4
// CHECK-NEXT: [[U32M4X2:%.*]] = alloca { <vscale x 8 x i32>, <vscale x 8 x i32> }, align 4
// CHECK-NEXT: [[I64M1X2:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[I64M1X3:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[I64M1X4:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[I64M1X5:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[I64M1X6:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[I64M1X7:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[I64M1X8:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[I64M2X2:%.*]] = alloca { <vscale x 2 x i64>, <vscale x 2 x i64> }, align 8
// CHECK-NEXT: [[I64M2X3:%.*]] = alloca { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> }, align 8
// CHECK-NEXT: [[I64M2X4:%.*]] = alloca { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> }, align 8
// CHECK-NEXT: [[I64M4X2:%.*]] = alloca { <vscale x 4 x i64>, <vscale x 4 x i64> }, align 8
// CHECK-NEXT: [[U64M1X2:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[U64M1X3:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[U64M1X4:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[U64M1X5:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[U64M1X6:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[U64M1X7:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[U64M1X8:%.*]] = alloca { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> }, align 8
// CHECK-NEXT: [[U64M2X2:%.*]] = alloca { <vscale x 2 x i64>, <vscale x 2 x i64> }, align 8
// CHECK-NEXT: [[U64M2X3:%.*]] = alloca { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> }, align 8
// CHECK-NEXT: [[U64M2X4:%.*]] = alloca { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> }, align 8
// CHECK-NEXT: [[U64M4X2:%.*]] = alloca { <vscale x 4 x i64>, <vscale x 4 x i64> }, align 8
// CHECK-NEXT: [[F16MF4X2:%.*]] = alloca { <vscale x 1 x half>, <vscale x 1 x half> }, align 2
// CHECK-NEXT: [[F16MF4X3:%.*]] = alloca { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> }, align 2
// CHECK-NEXT: [[F16MF4X4:%.*]] = alloca { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> }, align 2
// CHECK-NEXT: [[F16MF4X5:%.*]] = alloca { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> }, align 2
// CHECK-NEXT: [[F16MF4X6:%.*]] = alloca { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> }, align 2
// CHECK-NEXT: [[F16MF4X7:%.*]] = alloca { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> }, align 2
// CHECK-NEXT: [[F16MF4X8:%.*]] = alloca { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> }, align 2
// CHECK-NEXT: [[F16MF2X2:%.*]] = alloca { <vscale x 2 x half>, <vscale x 2 x half> }, align 2
// CHECK-NEXT: [[F16MF2X3:%.*]] = alloca { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> }, align 2
// CHECK-NEXT: [[F16MF2X4:%.*]] = alloca { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> }, align 2
// CHECK-NEXT: [[F16MF2X5:%.*]] = alloca { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> }, align 2
// CHECK-NEXT: [[F16MF2X6:%.*]] = alloca { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> }, align 2
// CHECK-NEXT: [[F16MF2X7:%.*]] = alloca { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> }, align 2
// CHECK-NEXT: [[F16MF2X8:%.*]] = alloca { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> }, align 2
// CHECK-NEXT: [[F16M1X2:%.*]] = alloca { <vscale x 4 x half>, <vscale x 4 x half> }, align 2
// CHECK-NEXT: [[F16M1X3:%.*]] = alloca { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> }, align 2
// CHECK-NEXT: [[F16M1X4:%.*]] = alloca { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> }, align 2
// CHECK-NEXT: [[F16M1X5:%.*]] = alloca { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> }, align 2
// CHECK-NEXT: [[F16M1X6:%.*]] = alloca { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> }, align 2
// CHECK-NEXT: [[F16M1X7:%.*]] = alloca { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> }, align 2
// CHECK-NEXT: [[F16M1X8:%.*]] = alloca { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> }, align 2
// CHECK-NEXT: [[F16M2X2:%.*]] = alloca { <vscale x 8 x half>, <vscale x 8 x half> }, align 2
// CHECK-NEXT: [[F16M2X3:%.*]] = alloca { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }, align 2
// CHECK-NEXT: [[F16M2X4:%.*]] = alloca { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> }, align 2
// CHECK-NEXT: [[F16M4X2:%.*]] = alloca { <vscale x 16 x half>, <vscale x 16 x half> }, align 2
// CHECK-NEXT: [[F32MF2X2:%.*]] = alloca { <vscale x 1 x float>, <vscale x 1 x float> }, align 4
// CHECK-NEXT: [[F32MF2X3:%.*]] = alloca { <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float> }, align 4
// CHECK-NEXT: [[F32MF2X4:%.*]] = alloca { <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float> }, align 4
// CHECK-NEXT: [[F32MF2X5:%.*]] = alloca { <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float> }, align 4
// CHECK-NEXT: [[F32MF2X6:%.*]] = alloca { <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float> }, align 4
// CHECK-NEXT: [[F32MF2X7:%.*]] = alloca { <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float> }, align 4
// CHECK-NEXT: [[F32MF2X8:%.*]] = alloca { <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float> }, align 4
// CHECK-NEXT: [[F32M1X2:%.*]] = alloca { <vscale x 2 x float>, <vscale x 2 x float> }, align 4
// CHECK-NEXT: [[F32M1X3:%.*]] = alloca { <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float> }, align 4
// CHECK-NEXT: [[F32M1X4:%.*]] = alloca { <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float> }, align 4
// CHECK-NEXT: [[F32M1X5:%.*]] = alloca { <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float> }, align 4
// CHECK-NEXT: [[F32M1X6:%.*]] = alloca { <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float> }, align 4
// CHECK-NEXT: [[F32M1X7:%.*]] = alloca { <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float> }, align 4
// CHECK-NEXT: [[F32M1X8:%.*]] = alloca { <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float> }, align 4
// CHECK-NEXT: [[F32M2X2:%.*]] = alloca { <vscale x 4 x float>, <vscale x 4 x float> }, align 4
// CHECK-NEXT: [[F32M2X3:%.*]] = alloca { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }, align 4
// CHECK-NEXT: [[F32M2X4:%.*]] = alloca { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> }, align 4
// CHECK-NEXT: [[F32M4X2:%.*]] = alloca { <vscale x 8 x float>, <vscale x 8 x float> }, align 4
// CHECK-NEXT: [[F64M1X2:%.*]] = alloca { <vscale x 1 x double>, <vscale x 1 x double> }, align 8
// CHECK-NEXT: [[F64M1X3:%.*]] = alloca { <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double> }, align 8
// CHECK-NEXT: [[F64M1X4:%.*]] = alloca { <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double> }, align 8
// CHECK-NEXT: [[F64M1X5:%.*]] = alloca { <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double> }, align 8
// CHECK-NEXT: [[F64M1X6:%.*]] = alloca { <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double> }, align 8
// CHECK-NEXT: [[F64M1X7:%.*]] = alloca { <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double> }, align 8
// CHECK-NEXT: [[F64M1X8:%.*]] = alloca { <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double> }, align 8
// CHECK-NEXT: [[F64M2X2:%.*]] = alloca { <vscale x 2 x double>, <vscale x 2 x double> }, align 8
// CHECK-NEXT: [[F64M2X3:%.*]] = alloca { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }, align 8
// CHECK-NEXT: [[F64M2X4:%.*]] = alloca { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> }, align 8
// CHECK-NEXT: [[F64M4X2:%.*]] = alloca { <vscale x 4 x double>, <vscale x 4 x double> }, align 8
// CHECK-NEXT: ret void
//
void foo () {
vbool64_t b64;
vbool32_t b32;
vbool16_t b16;
vbool8_t b8;
vint8mf8_t i8mf8;
vint8mf4_t i8mf4;
vint8mf2_t i8mf2;
vint8m1_t i8m1;
vint8m2_t i8m2;
vint8m4_t i8m4;
vint8m8_t i8m8;
vuint8mf8_t u8mf8;
vuint8mf4_t u8mf4;
vuint8mf2_t u8mf2;
vuint8m1_t u8m1;
vuint8m2_t u8m2;
vuint8m4_t u8m4;
vuint8m8_t u8m8;
vint16mf4_t i16mf4;
vint16mf2_t i16mf2;
vint16m1_t i16m1;
vint16m2_t i16m2;
vint16m4_t i16m4;
vint16m8_t i16m8;
vuint16mf4_t u16mf4;
vuint16mf2_t u16mf2;
vuint16m1_t u16m1;
vuint16m2_t u16m2;
vuint16m4_t u16m4;
vuint16m8_t u16m8;
vint32mf2_t i32mf2;
vint32m1_t i32m1;
vint32m2_t i32m2;
vint32m4_t i32m4;
vint32m8_t i32m8;
vuint32mf2_t u32mf2;
vuint32m1_t u32m1;
vuint32m2_t u32m2;
vuint32m4_t u32m4;
vuint32m8_t u32m8;
vint64m1_t i64m1;
vint64m2_t i64m2;
vint64m4_t i64m4;
vint64m8_t i64m8;
vuint64m1_t u64m1;
vuint64m2_t u64m2;
vuint64m4_t u64m4;
vuint64m8_t u64m8;
vfloat16mf4_t f16mf4;
vfloat16mf2_t f16mf2;
vfloat16m1_t f16m1;
vfloat16m2_t f16m2;
vfloat16m4_t f16m4;
vfloat16m8_t f16m8;
vfloat32mf2_t f32mf2;
vfloat32m1_t f32m1;
vfloat32m2_t f32m2;
vfloat32m4_t f32m4;
vfloat32m8_t f32m8;
vfloat64m1_t f64m1;
vfloat64m2_t f64m2;
vfloat64m4_t f64m4;
vfloat64m8_t f64m8;
// i8
vint8mf8x2_t i8mf8x2;
vint8mf8x3_t i8mf8x3;
vint8mf8x4_t i8mf8x4;
vint8mf8x5_t i8mf8x5;
vint8mf8x6_t i8mf8x6;
vint8mf8x7_t i8mf8x7;
vint8mf8x8_t i8mf8x8;
vint8mf4x2_t i8mf4x2;
vint8mf4x3_t i8mf4x3;
vint8mf4x4_t i8mf4x4;
vint8mf4x5_t i8mf4x5;
vint8mf4x6_t i8mf4x6;
vint8mf4x7_t i8mf4x7;
vint8mf4x8_t i8mf4x8;
vint8mf2x2_t i8mf2x2;
vint8mf2x3_t i8mf2x3;
vint8mf2x4_t i8mf2x4;
vint8mf2x5_t i8mf2x5;
vint8mf2x6_t i8mf2x6;
vint8mf2x7_t i8mf2x7;
vint8mf2x8_t i8mf2x8;
vint8m1x2_t i8m1x2;
vint8m1x3_t i8m1x3;
vint8m1x4_t i8m1x4;
vint8m1x5_t i8m1x5;
vint8m1x6_t i8m1x6;
vint8m1x7_t i8m1x7;
vint8m1x8_t i8m1x8;
vint8m2x2_t i8m2x2;
vint8m2x3_t i8m2x3;
vint8m2x4_t i8m2x4;
vint8m4x2_t i8m4x2;
// u8
vuint8mf8x2_t u8mf8x2;
vuint8mf8x3_t u8mf8x3;
vuint8mf8x4_t u8mf8x4;
vuint8mf8x5_t u8mf8x5;
vuint8mf8x6_t u8mf8x6;
vuint8mf8x7_t u8mf8x7;
vuint8mf8x8_t u8mf8x8;
vuint8mf4x2_t u8mf4x2;
vuint8mf4x3_t u8mf4x3;
vuint8mf4x4_t u8mf4x4;
vuint8mf4x5_t u8mf4x5;
vuint8mf4x6_t u8mf4x6;
vuint8mf4x7_t u8mf4x7;
vuint8mf4x8_t u8mf4x8;
vuint8mf2x2_t u8mf2x2;
vuint8mf2x3_t u8mf2x3;
vuint8mf2x4_t u8mf2x4;
vuint8mf2x5_t u8mf2x5;
vuint8mf2x6_t u8mf2x6;
vuint8mf2x7_t u8mf2x7;
vuint8mf2x8_t u8mf2x8;
vuint8m1x2_t u8m1x2;
vuint8m1x3_t u8m1x3;
vuint8m1x4_t u8m1x4;
vuint8m1x5_t u8m1x5;
vuint8m1x6_t u8m1x6;
vuint8m1x7_t u8m1x7;
vuint8m1x8_t u8m1x8;
vuint8m2x2_t u8m2x2;
vuint8m2x3_t u8m2x3;
vuint8m2x4_t u8m2x4;
vuint8m4x2_t u8m4x2;
// i16
vint16mf4x2_t i16mf4x2;
vint16mf4x3_t i16mf4x3;
vint16mf4x4_t i16mf4x4;
vint16mf4x5_t i16mf4x5;
vint16mf4x6_t i16mf4x6;
vint16mf4x7_t i16mf4x7;
vint16mf4x8_t i16mf4x8;
vint16mf2x2_t i16mf2x2;
vint16mf2x3_t i16mf2x3;
vint16mf2x4_t i16mf2x4;
vint16mf2x5_t i16mf2x5;
vint16mf2x6_t i16mf2x6;
vint16mf2x7_t i16mf2x7;
vint16mf2x8_t i16mf2x8;
vint16m1x2_t i16m1x2;
vint16m1x3_t i16m1x3;
vint16m1x4_t i16m1x4;
vint16m1x5_t i16m1x5;
vint16m1x6_t i16m1x6;
vint16m1x7_t i16m1x7;
vint16m1x8_t i16m1x8;
vint16m2x2_t i16m2x2;
vint16m2x3_t i16m2x3;
vint16m2x4_t i16m2x4;
vint16m4x2_t i16m4x2;
// u16
vuint16mf4x2_t u16mf4x2;
vuint16mf4x3_t u16mf4x3;
vuint16mf4x4_t u16mf4x4;
vuint16mf4x5_t u16mf4x5;
vuint16mf4x6_t u16mf4x6;
vuint16mf4x7_t u16mf4x7;
vuint16mf4x8_t u16mf4x8;
vuint16mf2x2_t u16mf2x2;
vuint16mf2x3_t u16mf2x3;
vuint16mf2x4_t u16mf2x4;
vuint16mf2x5_t u16mf2x5;
vuint16mf2x6_t u16mf2x6;
vuint16mf2x7_t u16mf2x7;
vuint16mf2x8_t u16mf2x8;
vuint16m1x2_t u16m1x2;
vuint16m1x3_t u16m1x3;
vuint16m1x4_t u16m1x4;
vuint16m1x5_t u16m1x5;
vuint16m1x6_t u16m1x6;
vuint16m1x7_t u16m1x7;
vuint16m1x8_t u16m1x8;
vuint16m2x2_t u16m2x2;
vuint16m2x3_t u16m2x3;
vuint16m2x4_t u16m2x4;
vuint16m4x2_t u16m4x2;
// i32
vint32mf2x2_t i32mf2x2;
vint32mf2x3_t i32mf2x3;
vint32mf2x4_t i32mf2x4;
vint32mf2x5_t i32mf2x5;
vint32mf2x6_t i32mf2x6;
vint32mf2x7_t i32mf2x7;
vint32mf2x8_t i32mf2x8;
vint32m1x2_t i32m1x2;
vint32m1x3_t i32m1x3;
vint32m1x4_t i32m1x4;
vint32m1x5_t i32m1x5;
vint32m1x6_t i32m1x6;
vint32m1x7_t i32m1x7;
vint32m1x8_t i32m1x8;
vint32m2x2_t i32m2x2;
vint32m2x3_t i32m2x3;
vint32m2x4_t i32m2x4;
vint32m4x2_t i32m4x2;
// u32
vuint32mf2x2_t u32mf2x2;
vuint32mf2x3_t u32mf2x3;
vuint32mf2x4_t u32mf2x4;
vuint32mf2x5_t u32mf2x5;
vuint32mf2x6_t u32mf2x6;
vuint32mf2x7_t u32mf2x7;
vuint32mf2x8_t u32mf2x8;
vuint32m1x2_t u32m1x2;
vuint32m1x3_t u32m1x3;
vuint32m1x4_t u32m1x4;
vuint32m1x5_t u32m1x5;
vuint32m1x6_t u32m1x6;
vuint32m1x7_t u32m1x7;
vuint32m1x8_t u32m1x8;
vuint32m2x2_t u32m2x2;
vuint32m2x3_t u32m2x3;
vuint32m2x4_t u32m2x4;
vuint32m4x2_t u32m4x2;
//i64
vint64m1x2_t i64m1x2;
vint64m1x3_t i64m1x3;
vint64m1x4_t i64m1x4;
vint64m1x5_t i64m1x5;
vint64m1x6_t i64m1x6;
vint64m1x7_t i64m1x7;
vint64m1x8_t i64m1x8;
vint64m2x2_t i64m2x2;
vint64m2x3_t i64m2x3;
vint64m2x4_t i64m2x4;
vint64m4x2_t i64m4x2;
// u64
vuint64m1x2_t u64m1x2;
vuint64m1x3_t u64m1x3;
vuint64m1x4_t u64m1x4;
vuint64m1x5_t u64m1x5;
vuint64m1x6_t u64m1x6;
vuint64m1x7_t u64m1x7;
vuint64m1x8_t u64m1x8;
vuint64m2x2_t u64m2x2;
vuint64m2x3_t u64m2x3;
vuint64m2x4_t u64m2x4;
vuint64m4x2_t u64m4x2;
// f16
vfloat16mf4x2_t f16mf4x2;
vfloat16mf4x3_t f16mf4x3;
vfloat16mf4x4_t f16mf4x4;
vfloat16mf4x5_t f16mf4x5;
vfloat16mf4x6_t f16mf4x6;
vfloat16mf4x7_t f16mf4x7;
vfloat16mf4x8_t f16mf4x8;
vfloat16mf2x2_t f16mf2x2;
vfloat16mf2x3_t f16mf2x3;
vfloat16mf2x4_t f16mf2x4;
vfloat16mf2x5_t f16mf2x5;
vfloat16mf2x6_t f16mf2x6;
vfloat16mf2x7_t f16mf2x7;
vfloat16mf2x8_t f16mf2x8;
vfloat16m1x2_t f16m1x2;
vfloat16m1x3_t f16m1x3;
vfloat16m1x4_t f16m1x4;
vfloat16m1x5_t f16m1x5;
vfloat16m1x6_t f16m1x6;
vfloat16m1x7_t f16m1x7;
vfloat16m1x8_t f16m1x8;
vfloat16m2x2_t f16m2x2;
vfloat16m2x3_t f16m2x3;
vfloat16m2x4_t f16m2x4;
vfloat16m4x2_t f16m4x2;
// f32
vfloat32mf2x2_t f32mf2x2;
vfloat32mf2x3_t f32mf2x3;
vfloat32mf2x4_t f32mf2x4;
vfloat32mf2x5_t f32mf2x5;
vfloat32mf2x6_t f32mf2x6;
vfloat32mf2x7_t f32mf2x7;
vfloat32mf2x8_t f32mf2x8;
vfloat32m1x2_t f32m1x2;
vfloat32m1x3_t f32m1x3;
vfloat32m1x4_t f32m1x4;
vfloat32m1x5_t f32m1x5;
vfloat32m1x6_t f32m1x6;
vfloat32m1x7_t f32m1x7;
vfloat32m1x8_t f32m1x8;
vfloat32m2x2_t f32m2x2;
vfloat32m2x3_t f32m2x3;
vfloat32m2x4_t f32m2x4;
vfloat32m4x2_t f32m4x2;
//i64
vfloat64m1x2_t f64m1x2;
vfloat64m1x3_t f64m1x3;
vfloat64m1x4_t f64m1x4;
vfloat64m1x5_t f64m1x5;
vfloat64m1x6_t f64m1x6;
vfloat64m1x7_t f64m1x7;
vfloat64m1x8_t f64m1x8;
vfloat64m2x2_t f64m2x2;
vfloat64m2x3_t f64m2x3;
vfloat64m2x4_t f64m2x4;
vfloat64m4x2_t f64m4x2;
}